x86: Do not read/write EFER MSR if it doesn't exist.
authorKeir Fraser <keir@xensource.com>
Tue, 13 Nov 2007 20:08:39 +0000 (20:08 +0000)
committerKeir Fraser <keir@xensource.com>
Tue, 13 Nov 2007 20:08:39 +0000 (20:08 +0000)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/acpi/power.c
xen/arch/x86/boot/head.S
xen/arch/x86/setup.c
xen/arch/x86/smpboot.c
xen/arch/x86/x86_32/asm-offsets.c
xen/arch/x86/x86_64/asm-offsets.c
xen/include/asm-x86/cpufeature.h

index 7a24e35d2a6dde1c1775a460e1f15e48ca7fd42b..f70bf096fce7dcc0d1c04b883f92588c5581bda5 100644 (file)
@@ -166,7 +166,8 @@ static int enter_state(u32 state)
 
     /* Restore CR4 and EFER from cached values. */
     write_cr4(read_cr4());
-    write_efer(read_efer());
+    if ( cpu_has_efer )
+        write_efer(read_efer());
 
     device_power_up();
 
index 168bedefd4124852da04446580392d2905f57d28..775c89a45b58b6c716c312e1dfd0611bb970ed5b 100644 (file)
@@ -98,6 +98,7 @@ __start:
         mov     $0x80000001,%eax
         cpuid
 1:      mov     %edx,sym_phys(cpuid_ext_features)
+        mov     %edx,sym_phys(boot_cpu_data)+CPUINFO_ext_features
 
 #if defined(__x86_64__)
         /* Check for availability of long mode. */
index 733ff2974a0c7b4785659ff564d9da731e239a45..5ab40978b41bd26604b432993cbd916d8ea4e3ae 100644 (file)
@@ -416,7 +416,8 @@ void __init __start_xen(unsigned long mbi_p)
     set_current((struct vcpu *)0xfffff000); /* debug sanity */
     idle_vcpu[0] = current;
     set_processor_id(0); /* needed early, for smp_processor_id() */
-    rdmsrl(MSR_EFER, this_cpu(efer));
+    if ( cpu_has_efer )
+        rdmsrl(MSR_EFER, this_cpu(efer));
     asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
 
     smp_prepare_boot_cpu();
index 101918ac8f5f85e1d2fd029e9c1c3b8232629564..32668e9455d5f5fa097aee93035064f24aa76ef8 100644 (file)
@@ -495,7 +495,8 @@ void __devinit start_secondary(void *unused)
        set_processor_id(cpu);
        set_current(idle_vcpu[cpu]);
        this_cpu(curr_vcpu) = idle_vcpu[cpu];
-       rdmsrl(MSR_EFER, this_cpu(efer));
+        if ( cpu_has_efer )
+            rdmsrl(MSR_EFER, this_cpu(efer));
        asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
 
        percpu_traps_init();
index 2b757c18e29a2865da5b9f937c27dd07376efbbe..92b25ad9a44eafc88e7d646e97f9a94bc68e9c1c 100644 (file)
@@ -115,4 +115,7 @@ void __dummy__(void)
     BLANK();
 
     DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
+    BLANK();
+
+    OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
 }
index 9bb41bc79451f28baa758f7b52ba856bced4fd60..e91dbc1f51caf35d773ca8fdb8e4a5bdcb9d843f 100644 (file)
@@ -137,4 +137,7 @@ void __dummy__(void)
 #endif
 
     DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
+    BLANK();
+
+    OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
 }
index 62b17e4a80d108db2aed35d9f132fca8cfc0fafc..58c1f9e41f5a914b377a73f86a559e812c0cb4c3 100644 (file)
 #define cpu_has_centaur_mcr    boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
 #define cpu_has_page1gb                0
+#define cpu_has_efer           (boot_cpu_data.x86_capability[1] & 0x20100800)
 #else /* __x86_64__ */
 #define cpu_has_vme            0
 #define cpu_has_de             1
 #define cpu_has_centaur_mcr    0
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
 #define cpu_has_page1gb                boot_cpu_has(X86_FEATURE_PAGE1GB)
+#define cpu_has_efer           1
 #endif
 
 #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \